Large-scale sparse linear programming (LP) underpins critical applications in logistics, manufacturing, and scientific computing. However, conventional CPU-only solvers often fail to meet real-time performance demands as problem sizes reach millions of variables and constraints. This paper presents GCP-LP, a GPU-CPU collaborative framework that accelerates the COIN-OR Linear Programming solver by offloading key computational bottlenecks-including sparse matrixvector multiplication (SpMV), pivot selection, and Cholesky factorization-to GPUs while retaining sequential control flow on the CPU. By combining asynchronous data transfer, GPUoptimized memory layouts, and adaptive load balancing, GCPLP achieves up to 10% module-level and 5% overall runtime reductions on NETLIB and Mittelmann benchmarks.
»@inproceedings{HHWLicpads25,
address = {Hefei, China},
author = {Zi-Rui Huang and Yi-Xiang Hu and Feng Wu and Xiang-Yang Li},
booktitle = {2025 IEEE 31th International Conference on Parallel and Distributed Systems (ICPADS)},
doi = {10.1109/ICPADS67057.2025.11323132},
month = {December},
pages = {1-8},
title = {GCP-LP: A GPU-CPU Collaborative Framework for Accelerating Large-Scale Sparse Linear Programming},
year = {2025}
}